In [ ]:
%matplotlib inline
import os
import sys
import datetime
import warnings
import csv
import numpy as np
import matplotlib.pyplot as plt
import pandas
import seaborn
seaborn.set(style='ticks', context='paper')
import wqio
import pybmpdb
import pynsqd
import pycvc
min_precip = 1.9999
big_storm_date = datetime.date(2013, 7, 8)
palette = seaborn.color_palette('deep', n_colors=6)
pybmpdb.setMPLStyle()
POCs = [
p['cvcname']
for p in filter(
lambda p: p['include'],
pycvc.info.POC_dicts
)
]
if wqio.testing.checkdep_tex() is None:
tex_msg = ("LaTeX not found on system path. You will "
"not be able to compile ISRs to PDF files")
warnings.warn(tex_msg, UserWarning)
warning_filter = "ignore"
warnings.simplefilter(warning_filter)
In [ ]:
bmpdb = pycvc.external.bmpdb(palette[3], 'D')
nsqdata = pycvc.external.nsqd(palette[2], 'd')
In [ ]:
cvcdbfile = "C:/users/phobson/Desktop/scratch/cvc/cvc.accdb"
cvcdb = pycvc.Database(cvcdbfile, nsqdata, bmpdb)
In [ ]:
LV1 = pycvc.Site(db=cvcdb, siteid='LV-1', raingauge='LV-1', tocentry='Lakeview Control',
isreference=True, minprecip=min_precip, color=palette[1], marker='s')
In [ ]:
def LV1_runoff(row):
return LV1.drainagearea.simple_method(row['total_precip_depth'], volume_conversion=0.001)
LV1.runoff_fxn = LV1_runoff
In [ ]:
def rename_influent_cols(col):
if col.lower() in ['parameter', 'units', 'season']:
newcol = col.lower()
else:
newcol = 'influent {}'.format(col.lower())
return newcol.replace(' nsqd ', ' ').replace(' effluent ', ' ')
In [ ]:
LV_Influent = (
LV1.medians("concentration", groupby_col='season')
.rename(columns={'effluent stat': 'median'})
.rename(columns=rename_influent_cols)
)
LV1.influentmedians = LV_Influent
LV_Influent.head()
In [ ]:
ED_Influent = (
cvcdb.nsqdata
.seasonal_medians
.rename(columns=rename_influent_cols)
)
ED_Influent.head()
In [ ]:
ED1 = pycvc.Site(db=cvcdb, siteid='ED-1', raingauge='ED-1',
tocentry='Elm Drive', influentmedians=ED_Influent,
minprecip=min_precip, isreference=False,
color=palette[0], marker='o')
LV2 = pycvc.Site(db=cvcdb, siteid='LV-2', raingauge='LV-1',
tocentry='Lakeview Grass Swale', influentmedians=LV_Influent,
minprecip=min_precip, isreference=False,
color=palette[4], marker='^')
LV4 = pycvc.Site(db=cvcdb, siteid='LV-4', raingauge='LV-1',
tocentry=r'Lakeview Bioswale 1$^{\mathrm{st}}$ South Side',
influentmedians=LV_Influent,
minprecip=min_precip, isreference=False,
color=palette[5], marker='v')
In [ ]:
def ED1_runoff(row):
return ED1.drainagearea.simple_method(row['total_precip_depth'], volume_conversion=0.001)
def ED1_inflow(row):
return ED1_runoff(row)
ED1.runoff_fxn = ED1_runoff
ED1.inflow_fxn = ED1_inflow
In [ ]:
def LV2_runoff(row):
return LV2.drainagearea.simple_method(row['total_precip_depth'], volume_conversion=0.001)
def LV2_inflow(row):
return LV2_runoff(row)
LV2.runoff_fxn = LV2_runoff
LV2.inflow_fxn = LV2_inflow
In [ ]:
def LV4_runoff(row):
return LV4.drainagearea.simple_method(row['total_precip_depth'], volume_conversion=0.001)
def LV4_inflow(row):
return LV4_runoff(row)
LV4.runoff_fxn = LV4_runoff
LV4.inflow_fxn = LV4_inflow
In [ ]:
ED1.hydrodata.data.loc['2012-08-10 23:50:00':'2012-08-11 05:20', 'storm'] = 0
ED1.hydrodata.data.loc['2012-08-11 05:30':, 'storm'] += 1
In [ ]:
# volume from the spreadsheet model
modeled_inflow_Liters = 430603
modeled_outflow_Liters = 250965
# select the big storm
bigstorm = ED1.storm_info.loc[ED1.storm_info.start_date.dt.date == big_storm_date].iloc[0]
# overwrite values in the storm_info dataframe
ED1.storm_info.loc[bigstorm.name, 'inflow_m3'] = modeled_inflow_Liters / pycvc.info.LITERS_PER_CUBICMETER
ED1.storm_info.loc[bigstorm.name, 'outflow_m3'] = modeled_outflow_Liters / pycvc.info.LITERS_PER_CUBICMETER
# modify the volumes in the individual storm objects
ED1.storms[bigstorm.storm_number].total_inflow_volume = modeled_inflow_Liters / pycvc.info.LITERS_PER_CUBICMETER
ED1.storms[bigstorm.storm_number].total_outflow_volume = modeled_outflow_Liters / pycvc.info.LITERS_PER_CUBICMETER
In [ ]:
hydro = pycvc.summary.collect_tidy_data(
[ED1, LV1, LV2, LV4],
lambda s: s.tidy_hydro
)
hydro.to_csv('output/tidy/hydro_simple.csv', index=False)
In [ ]:
wq = pycvc.summary.collect_tidy_data(
[ED1, LV1, LV2, LV4],
lambda s: s.tidy_wq
)
wq.to_csv('output/tidy/wq_simple.csv', index=False)
In [ ]:
for site in [ED1, LV1, LV2, LV4]:
print('\n----Compiling ISR for {0}----'.format(site.siteid))
site.allISRs('composite', version='draft')